Transform double mapping to single mapping on vti domain.
authordjm@kirby.fc.hp.com <djm@kirby.fc.hp.com>
Thu, 8 Sep 2005 15:18:40 +0000 (09:18 -0600)
committerdjm@kirby.fc.hp.com <djm@kirby.fc.hp.com>
Thu, 8 Sep 2005 15:18:40 +0000 (09:18 -0600)
After this change I think it is possible to merge ivt.S file.
Signed-off-by Anthony Xu <anthony.xu@intel.com>

22 files changed:
xen/arch/ia64/linux-xen/efi.c
xen/arch/ia64/linux-xen/unaligned.c
xen/arch/ia64/vmx/mm.c
xen/arch/ia64/vmx/vlsapic.c
xen/arch/ia64/vmx/vmmu.c
xen/arch/ia64/vmx/vmx_entry.S
xen/arch/ia64/vmx/vmx_irq_ia64.c
xen/arch/ia64/vmx/vmx_ivt.S
xen/arch/ia64/vmx/vmx_minstate.h
xen/arch/ia64/vmx/vmx_phy_mode.c
xen/arch/ia64/vmx/vmx_process.c
xen/arch/ia64/vmx/vmx_vcpu.c
xen/arch/ia64/vmx/vtlb.c
xen/arch/ia64/xen/hyperprivop.S
xen/arch/ia64/xen/regionreg.c
xen/arch/ia64/xen/vcpu.c
xen/include/asm-ia64/mm.h
xen/include/asm-ia64/regionreg.h
xen/include/asm-ia64/vmmu.h
xen/include/asm-ia64/vmx.h
xen/include/asm-ia64/vmx_vcpu.h
xen/include/public/arch-ia64.h

index a68e95a74318ee1cb53254cd399694c043b177b6..e2ba47d0005c55da46d492d7f880dea8fb060c0a 100644 (file)
@@ -523,11 +523,21 @@ efi_get_pal_addr (void)
        return NULL;
 }
 
+
+#ifdef XEN
+void *pal_vaddr;
+#endif
+
 void
 efi_map_pal_code (void)
 {
+#ifdef XEN
+       u64 psr;
+       pal_vaddr = efi_get_pal_addr ();
+#else
        void *pal_vaddr = efi_get_pal_addr ();
        u64 psr;
+#endif
 
        if (!pal_vaddr)
                return;
index 38b818bf6e54c53a22c46f11b1e36b159da07ea1..ace9c4fa059a90449780b71a4cbbfaadff37765c 100644 (file)
@@ -296,7 +296,7 @@ rotate_reg (unsigned long sor, unsigned long rrb, unsigned long reg)
 }
 
 #if defined(XEN) && defined(CONFIG_VTI)
-static void
+void
 set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, unsigned long nat)
 {
        struct switch_stack *sw = (struct switch_stack *) regs - 1;
@@ -359,6 +359,57 @@ set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, unsigned
     }
     ia64_set_rsc(old_rsc);
 }
+
+
+static void
+get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, unsigned long *nat)
+{
+    struct switch_stack *sw = (struct switch_stack *) regs - 1;
+    unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
+    unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
+    unsigned long rnats, nat_mask;
+    unsigned long on_kbs;
+    unsigned long old_rsc, new_rsc;
+    long sof = (regs->cr_ifs) & 0x7f;
+    long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
+    long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+    long ridx = r1 - 32;
+
+    if (ridx >= sof) {
+        /* read of out-of-frame register returns an undefined value; 0 in our case.  */
+        DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", r1, sof);
+        panic("wrong stack register number");
+    }
+
+    if (ridx < sor)
+        ridx = rotate_reg(sor, rrb_gr, ridx);
+
+    old_rsc=ia64_get_rsc();
+    new_rsc=old_rsc&(~(0x3));
+    ia64_set_rsc(new_rsc);
+
+    bspstore = ia64_get_bspstore();
+    bsp =kbs + (regs->loadrs >> 19); //16+3;
+
+    addr = ia64_rse_skip_regs(bsp, -sof + ridx);
+    nat_mask = 1UL << ia64_rse_slot_num(addr);
+    rnat_addr = ia64_rse_rnat_addr(addr);
+
+    if(addr >= bspstore){
+
+        ia64_flushrs ();
+        ia64_mf ();
+        bspstore = ia64_get_bspstore();
+    }
+    *val=*addr;
+    if(bspstore < rnat_addr){
+        *nat=!!(ia64_get_rnat()&nat_mask);
+    }else{
+        *nat = !!((*rnat_addr)&nat_mask);
+    }
+    ia64_set_rsc(old_rsc);
+}
+
 #else // CONFIG_VTI
 static void
 set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat)
index deeccbbe678326da17a50676a7cb9985ea54c467..7af69ebe08657099cca50256e49132b31402c85b 100644 (file)
@@ -125,7 +125,7 @@ int do_mmu_update(mmu_update_t *ureqs,u64 count,u64 *pdone,u64 foreigndom)
             entry.cl = DSIDE_TLB;
             rr = vmx_vcpu_rr(vcpu, req.ptr);
             entry.ps = rr.ps;
-            entry.key = redistribute_rid(rr.rid);
+            entry.key = rr.rid;
             entry.rid = rr.rid;
             entry.vadr = PAGEALIGN(req.ptr,entry.ps);
             sections.tr = 1;
index 041c449807046f1c0ff928276017f756c0552c1e..965944476f2ffca3ce507eb6891391d58b706834 100644 (file)
@@ -181,7 +181,7 @@ void vtm_set_itv(VCPU *vcpu)
  */
 /* Interrupt must be disabled at this point */
 
-extern u64 tick_to_ns(u64 tick);
+extern u64 cycle_to_ns(u64 cyle);
 #define TIMER_SLOP (50*1000) /* ns */  /* copy from ac_timer.c */
 void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm)
 {
@@ -212,7 +212,7 @@ void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm)
     }
     /* Both last_itc & cur_itc < itm, wait for fire condition */
     else {
-        expires = NOW() + tick_to_ns(0-diff_now) + TIMER_SLOP;
+        expires = NOW() + cycle_to_ns(0-diff_now) + TIMER_SLOP;
         set_ac_timer(&vtm->vtm_timer, expires);
     }
     local_irq_restore(spsr);
index 4351065adf6f4fea3f1dc2ff9e02a995ac29ab34..fbe5380a386d27d29b76fc1da01984706a9f5830 100644 (file)
@@ -91,6 +91,10 @@ ia64_rr vmmu_get_rr(VCPU *vcpu, u64 va)
 
 void recycle_message(thash_cb_t *hcb, u64 para)
 {
+    if(hcb->ht == THASH_VHPT)
+    {
+        printk("ERROR : vhpt recycle happenning!!!\n");
+    }
     printk("hcb=%p recycled with %lx\n",hcb,para);
 }
 
@@ -237,8 +241,12 @@ alloc_pmt(struct domain *d)
  */
 void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb)
 {
-    u64     saved_itir, saved_ifa, saved_rr;
+#if 0
+    u64     saved_itir, saved_ifa;
+#endif
+    u64      saved_rr;
     u64     pages;
+    u64     psr;
     thash_data_t    mtlb;
     ia64_rr vrr;
     unsigned int    cl = tlb->cl;
@@ -253,12 +261,12 @@ void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb)
     if (mtlb.ppn == INVALID_MFN)
     panic("Machine tlb insert with invalid mfn number.\n");
 
-    __asm __volatile("rsm   psr.ic|psr.i;; srlz.i" );
-    
+    psr = ia64_clear_ic();
+#if 0
     saved_itir = ia64_getreg(_IA64_REG_CR_ITIR);
     saved_ifa = ia64_getreg(_IA64_REG_CR_IFA);
+#endif
     saved_rr = ia64_get_rr(mtlb.ifa);
-
     ia64_setreg(_IA64_REG_CR_ITIR, mtlb.itir);
     ia64_setreg(_IA64_REG_CR_IFA, mtlb.ifa);
     /* Only access memory stack which is mapped by TR,
@@ -268,19 +276,23 @@ void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb)
     ia64_srlz_d();
     if ( cl == ISIDE_TLB ) {
         ia64_itci(mtlb.page_flags);
-    ia64_srlz_i();
+        ia64_srlz_i();
     }
     else {
         ia64_itcd(mtlb.page_flags);
-    ia64_srlz_d();
+        ia64_srlz_d();
     }
     ia64_set_rr(mtlb.ifa,saved_rr);
     ia64_srlz_d();
+#if 0
     ia64_setreg(_IA64_REG_CR_IFA, saved_ifa);
     ia64_setreg(_IA64_REG_CR_ITIR, saved_itir);
-    __asm __volatile("ssm   psr.ic|psr.i;; srlz.i" );
+#endif
+    ia64_set_psr(psr);
+    ia64_srlz_i();
 }
 
+
 u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps)
 {
     u64     saved_pta, saved_rr0;
@@ -289,7 +301,6 @@ u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps)
     struct vcpu *v = current;
     ia64_rr vrr;
 
-    
     saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
     saved_rr0 = ia64_get_rr(0);
     vrr.rrval = saved_rr0;
@@ -308,7 +319,7 @@ u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps)
 
     ia64_set_rr(0, saved_rr0);
     ia64_srlz_d();
-    local_irq_restore(psr);
+    ia64_set_psr(psr);
     return hash_addr;
 }
 
@@ -320,7 +331,7 @@ u64 machine_ttag(PTA pta, u64 va, u64 rid, u64 ps)
     struct vcpu *v = current;
     ia64_rr vrr;
 
-    // TODO: Set to enforce lazy mode    
+    // TODO: Set to enforce lazy mode
     saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
     saved_rr0 = ia64_get_rr(0);
     vrr.rrval = saved_rr0;
@@ -341,7 +352,6 @@ u64 machine_ttag(PTA pta, u64 va, u64 rid, u64 ps)
     local_irq_restore(psr);
     return tag;
 }
-
 /*
  *  Purge machine tlb.
  *  INPUT
index 261fc591f47532e8ecd602fe3024c67869ae94bc..0815bcdb0cb13b252acde2e8574a3345337f47bf 100644 (file)
@@ -33,7 +33,7 @@
 #include <asm/processor.h>
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
-
+#include <asm/vhpt.h>
 #include "vmx_minstate.h"
 
 /*
@@ -401,8 +401,9 @@ vmx_dorfirfi_back:
     mov b0=r16
     br.cond.sptk b0         // call the service
     ;;
-// switch rr7 and rr5
 switch_rr7:
+#ifdef XEN_DBL_MAPPING
+// switch rr7 and rr5
     adds r24=SWITCH_MRR5_OFFSET, r21
     adds r26=SWITCH_MRR6_OFFSET, r21
     adds r16=SWITCH_MRR7_OFFSET ,r21
@@ -428,6 +429,7 @@ switch_rr7:
     ;;
     srlz.i
     ;;
+#endif
 // fall through
 GLOBAL_ENTRY(ia64_vmm_entry)
 /*
@@ -470,6 +472,7 @@ GLOBAL_ENTRY(vmx_dorfirfi)
        ;;
 END(vmx_dorfirfi)
 
+#ifdef XEN_DBL_MAPPING  /* will be removed */
 
 #define VMX_PURGE_RR7  0
 #define VMX_INSERT_RR7 1
@@ -609,3 +612,180 @@ GLOBAL_ENTRY(vmx_switch_rr7)
     br.sptk rp
 END(vmx_switch_rr7)
     .align PAGE_SIZE
+
+#else
+/*
+ * in0: new rr7
+ * in1: virtual address of shared_info
+ * in2: virtual address of shared_arch_info (VPD)
+ * in3: virtual address of guest_vhpt
+ * in4: virtual address of pal code segment
+ * r8: will contain old rid value
+ */
+
+
+#define PSR_BITS_TO_CLEAR                      \
+   (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB |IA64_PSR_RT |     \
+    IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED |    \
+    IA64_PSR_DFL | IA64_PSR_DFH)
+#define PSR_BITS_TO_SET    IA64_PSR_BN
+
+//extern void vmx_switch_rr7(unsigned long rid,void *shared_info, void *shared_arch_info, void *guest_vhpt, void * pal_vaddr );
+
+GLOBAL_ENTRY(vmx_switch_rr7)
+   // not sure this unwind statement is correct...
+   .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
+   alloc loc1 = ar.pfs, 5, 9, 0, 0
+1: {
+     mov r28  = in0        // copy procedure index
+     mov r8   = ip         // save ip to compute branch
+     mov loc0 = rp         // save rp
+    };;
+    .body
+    movl loc2=PERCPU_ADDR
+    ;;
+    tpa loc2 = loc2         // get physical address of per cpu date
+    ;;
+    dep loc3 = 0,in1,60,4          // get physical address of shared_info
+    dep loc4 = 0,in2,60,4          // get physical address of shared_arch_info
+    dep loc5 = 0,in3,60,4          // get physical address of guest_vhpt
+    dep loc6 = 0,in4,60,4          // get physical address of pal code
+    ;;
+    mov loc7 = psr          // save psr
+    ;;
+    mov loc8 = ar.rsc           // save RSE configuration
+    ;;
+    mov ar.rsc = 0          // put RSE in enforced lazy, LE mode
+    movl r16=PSR_BITS_TO_CLEAR
+    movl r17=PSR_BITS_TO_SET
+    ;;
+    or loc7 = loc7,r17      // add in psr the bits to set
+    ;;
+    andcm r16=loc7,r16      // removes bits to clear from psr
+    br.call.sptk.many rp=ia64_switch_mode_phys
+1:
+   // now in physical mode with psr.i/ic off so do rr7 switch
+    dep r16=-1,r0,61,3
+    ;;
+    mov rr[r16]=in0
+    srlz.d
+    ;;
+    rsm 0x6000
+    ;;
+    srlz.d
+
+    // re-pin mappings for kernel text and data
+    mov r18=KERNEL_TR_PAGE_SHIFT<<2
+    movl r17=KERNEL_START
+    ;;
+    ptr.i   r17,r18
+    ptr.d   r17,r18
+    ;;
+    mov cr.itir=r18
+    mov cr.ifa=r17
+    mov r16=IA64_TR_KERNEL
+    //mov r3=ip
+    movl r25 = PAGE_KERNEL
+    ;;
+    dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
+    ;;
+    or r18=r2,r25
+    ;;
+   srlz.i
+   ;;
+   itr.i itr[r16]=r18
+   ;;
+   itr.d dtr[r16]=r18
+   ;;
+
+   // re-pin mappings for per-cpu data
+
+   movl r22 = PERCPU_ADDR
+   ;;
+   mov r24=IA64_TR_PERCPU_DATA
+   or loc2 = r25,loc2          // construct PA | page properties
+   mov r23=PERCPU_PAGE_SHIFT<<2
+   ;;
+   ptr.d   r22,r23
+   ;;
+   mov cr.itir=r23
+   mov cr.ifa=r22
+   ;;
+   itr.d dtr[r24]=loc2     // wire in new mapping...
+   ;;
+
+
+#if    0
+   // re-pin mappings for shared_info
+
+   mov r24=IA64_TR_SHARED_INFO
+   movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
+   ;;
+   or loc3 = r25,loc3          // construct PA | page properties
+   mov r23 = PAGE_SHIFT<<2
+   ;;
+   ptr.d   in1,r23
+   ;;
+   mov cr.itir=r23
+   mov cr.ifa=in1
+   ;;
+   itr.d dtr[r24]=loc3     // wire in new mapping...
+   ;;
+   // re-pin mappings for shared_arch_info
+
+   mov r24=IA64_TR_ARCH_INFO
+   or loc4 = r25,loc4          // construct PA | page properties
+   mov r23 = PAGE_SHIFT<<2
+   ;;
+   ptr.d   in2,r23
+   ;;
+   mov cr.itir=r23
+   mov cr.ifa=in2
+   ;;
+   itr.d dtr[r24]=loc4     // wire in new mapping...
+   ;;
+#endif
+
+
+   // re-pin mappings for guest_vhpt
+
+   mov r24=IA64_TR_VHPT
+   movl r25=PAGE_KERNEL
+   ;;
+   or loc5 = r25,loc5          // construct PA | page properties
+   mov r23 = VCPU_TLB_SHIFT<<2
+   ;;
+   ptr.d   in3,r23
+   ;;
+   mov cr.itir=r23
+   mov cr.ifa=in3
+   ;;
+   itr.d dtr[r24]=loc5     // wire in new mapping...
+   ;;
+
+   // re-pin mappings for PAL code section
+
+   mov r24=IA64_TR_PALCODE
+   or loc6 = r25,loc6          // construct PA | page properties
+   mov r23 = IA64_GRANULE_SHIFT<<2
+   ;;
+   ptr.i   in4,r23
+   ;;
+   mov cr.itir=r23
+   mov cr.ifa=in4
+   ;;
+   itr.i itr[r24]=loc6     // wire in new mapping...
+   ;;
+
+   // done, switch back to virtual and return
+   mov r16=loc7            // r16= original psr
+   br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
+   mov ar.pfs = loc1
+   mov rp = loc0
+   ;;
+   mov ar.rsc=loc8         // restore RSE configuration
+   srlz.d              // seralize restoration of psr.l
+   br.ret.sptk.many rp
+END(vmx_switch_rr7)
+#endif
+
index 281da3482eef84dcb8212f5b53916b528b5bfa2d..19827329b2148c220332c2982ea6dcd514a5f6aa 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/system.h>
 
+#ifdef CONFIG_SMP
+#   define IS_RESCHEDULE(vec)   (vec == IA64_IPI_RESCHEDULE)
+#else
+#   define IS_RESCHEDULE(vec)   (0)
+#endif
+
 #ifdef CONFIG_PERFMON
 # include <asm/perfmon.h>
 #endif
index cec1730821155848c771d95e7e12655f74069069..b78489925b541e9d7e1365ea7838fd8ff181cc93 100644 (file)
@@ -118,10 +118,12 @@ ENTRY(vmx_itlb_miss)
     mov r29=cr.ipsr;
     ;;
     tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
-(p6) br.sptk vmx_fault_1
+(p6) br.sptk vmx_alt_itlb_miss_1
+//(p6) br.sptk vmx_fault_1
     mov r16 = cr.ifa
     ;;
     thash r17 = r16
+    ;;
     ttag r20 = r16
     ;;
 vmx_itlb_loop:
@@ -180,10 +182,12 @@ ENTRY(vmx_dtlb_miss)
     mov r29=cr.ipsr;
     ;;
     tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
-(p6)br.sptk vmx_fault_2
+    (p6)br.sptk vmx_alt_dtlb_miss_1
+//(p6)br.sptk vmx_fault_2
     mov r16 = cr.ifa
     ;;
     thash r17 = r16
+    ;;
     ttag r20 = r16
     ;;
 vmx_dtlb_loop:
@@ -243,6 +247,7 @@ ENTRY(vmx_alt_itlb_miss)
     ;;
     tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
 (p7)br.sptk vmx_fault_3
+vmx_alt_itlb_miss_1:
        mov r16=cr.ifa          // get address that caused the TLB miss
        movl r17=PAGE_KERNEL
        mov r24=cr.ipsr
@@ -272,6 +277,7 @@ ENTRY(vmx_alt_dtlb_miss)
     ;;
     tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
 (p7)br.sptk vmx_fault_4
+vmx_alt_dtlb_miss_1:
        mov r16=cr.ifa          // get address that caused the TLB miss
        movl r17=PAGE_KERNEL
        mov r20=cr.isr
index 0356fdbf8d5d80d0bc55e13ff6df6c4a4f2803ee..e82e61dee30b96448354876497535a69db0bed10 100644 (file)
  * Note that psr.ic is NOT turned on by this macro.  This is so that
  * we can pass interruption state as arguments to a handler.
  */
-#define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)                           \
+#ifdef XEN_DBL_MAPPING
+#define SAVE_MIN_CHANGE_RR  \
 /*  switch rr7 */       \
     movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)); \
     movl r17=(7<<61);        \
     movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)); \
     movl r22=(6<<61);        \
-    movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1);                \
-    movl r23=(5<<61);  \
+    movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1);     \
+    movl r23=(5<<61);   \
     ;;              \
     mov rr[r17]=r16;             \
-    mov rr[r22]=r20;            \
-    mov rr[r23]=r18;            \
+    mov rr[r22]=r20;         \
+    mov rr[r23]=r18;         \
     ;;      \
     srlz.i;      \
-    ;;  \
+    ;;
+
+#else
+
+#define SAVE_MIN_CHANGE_RR
+
+#endif
+
+#define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)                           \
+    SAVE_MIN_CHANGE_RR;      \
     VMX_MINSTATE_GET_CURRENT(r16);  /* M (or M;;I) */                   \
     mov r27=ar.rsc;         /* M */                         \
     mov r20=r1;         /* A */                         \
index d7a51c9f604b8491f14a7885e4480b9e462cc4ea..74e08f78b391867d7fa017cfd10748b031b722f5 100644 (file)
@@ -28,7 +28,6 @@
 #include <xen/sched.h>
 #include <asm/pgtable.h>
 
-
 int valid_mm_mode[8] = {
     GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
     INV_MODE,
@@ -215,13 +214,13 @@ void
 vmx_init_all_rr(VCPU *vcpu)
 {
        VMX(vcpu,vrr[VRN0]) = 0x38;
-       VMX(vcpu,vrr[VRN1]) = 0x38;
-       VMX(vcpu,vrr[VRN2]) = 0x38;
-       VMX(vcpu,vrr[VRN3]) = 0x38;
-       VMX(vcpu,vrr[VRN4]) = 0x38;
-       VMX(vcpu,vrr[VRN5]) = 0x38;
-       VMX(vcpu,vrr[VRN6]) = 0x60;
-       VMX(vcpu,vrr[VRN7]) = 0x60;
+       VMX(vcpu,vrr[VRN1]) = 0x138;
+       VMX(vcpu,vrr[VRN2]) = 0x238;
+       VMX(vcpu,vrr[VRN3]) = 0x338;
+       VMX(vcpu,vrr[VRN4]) = 0x438;
+       VMX(vcpu,vrr[VRN5]) = 0x538;
+       VMX(vcpu,vrr[VRN6]) = 0x660;
+       VMX(vcpu,vrr[VRN7]) = 0x760;
 
        VMX(vcpu,mrr5) = vmx_vrrtomrr(vcpu, 0x38);
        VMX(vcpu,mrr6) = vmx_vrrtomrr(vcpu, 0x60);
@@ -234,10 +233,8 @@ vmx_load_all_rr(VCPU *vcpu)
        unsigned long psr;
        ia64_rr phy_rr;
 
-       psr = ia64_clear_ic();
+       local_irq_save(psr);
 
-       phy_rr.ps = EMUL_PHY_PAGE_SHIFT; 
-       phy_rr.ve = 1;
 
        /* WARNING: not allow co-exist of both virtual mode and physical
         * mode in same region
@@ -245,9 +242,15 @@ vmx_load_all_rr(VCPU *vcpu)
        if (is_physical_mode(vcpu)) {
                if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
                        panic("Unexpected domain switch in phy emul\n");
-               phy_rr.rid = vcpu->domain->arch.metaphysical_rr0;
+               phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
+       phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+       phy_rr.ve = 1;
+
                ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval);
-               phy_rr.rid = vcpu->domain->arch.metaphysical_rr4;
+               phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
+       phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+           phy_rr.ve = 1;
+
                ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval);
        } else {
                ia64_set_rr((VRN0 << VRN_SHIFT),
@@ -265,6 +268,18 @@ vmx_load_all_rr(VCPU *vcpu)
        ia64_set_rr((VRN3 << VRN_SHIFT),
                     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
 #endif
+#ifndef XEN_DBL_MAPPING
+    extern void * pal_vaddr;
+    ia64_set_rr((VRN5 << VRN_SHIFT),
+            vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
+    ia64_set_rr((VRN6 << VRN_SHIFT),
+            vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
+    vmx_switch_rr7(vmx_vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),(void *)vcpu->domain->shared_info,
+                (void *)vcpu->vcpu_info->arch.privregs,
+                ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
+    ia64_set_pta(vcpu->arch.arch_vmx.mpta);
+#endif
+
        ia64_srlz_d();
        ia64_set_psr(psr);
     ia64_srlz_i();
@@ -276,15 +291,17 @@ switch_to_physical_rid(VCPU *vcpu)
     UINT64 psr;
     ia64_rr phy_rr;
 
-    phy_rr.ps = EMUL_PHY_PAGE_SHIFT; 
-    phy_rr.ve = 1;
 
     /* Save original virtual mode rr[0] and rr[4] */
     psr=ia64_clear_ic();
-    phy_rr.rid = vcpu->domain->arch.metaphysical_rr0;
+    phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
+    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+    phy_rr.ve = 1;
     ia64_set_rr(VRN0<<VRN_SHIFT, phy_rr.rrval);
     ia64_srlz_d();
-    phy_rr.rid = vcpu->domain->arch.metaphysical_rr4;
+    phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
+    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+    phy_rr.ve = 1;
     ia64_set_rr(VRN4<<VRN_SHIFT, phy_rr.rrval);
     ia64_srlz_d();
 
index 0456d9d0019f944755e097ece4b5ee7a8cc802aa..5cb6a78cd0c31a37204d8365c3523188d29161e8 100644 (file)
@@ -41,7 +41,7 @@
 #include <asm/regionreg.h>
 #include <asm/privop.h>
 #include <asm/ia64_int.h>
-#include <asm/hpsim_ssc.h>
+//#include <asm/hpsim_ssc.h>
 #include <asm/dom_fw.h>
 #include <asm/vmx_vcpu.h>
 #include <asm/kregs.h>
index 24d468fbdd54e634c09c6aad0e48efdec289a318..913a316f3b123aad17d2e1864ce5d1366c265186 100644 (file)
@@ -215,6 +215,7 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
 {
     ia64_rr oldrr,newrr;
     thash_cb_t *hcb;
+    extern void * pal_vaddr;
     oldrr=vmx_vcpu_rr(vcpu,reg);
     newrr.rrval=val;
 #if 1
@@ -224,7 +225,9 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
     }
 #endif
     VMX(vcpu,vrr[reg>>61]) = val;
+
     switch((u64)(reg>>61)) {
+#ifdef XEN_DBL_MAPPING
     case VRN5:
         VMX(vcpu,mrr5)=vmx_vrrtomrr(vcpu,val);
         break;
@@ -234,12 +237,17 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
     case VRN7:
         VMX(vcpu,mrr7)=vmx_vrrtomrr(vcpu,val);
         /* Change double mapping for this domain */
-#ifdef XEN_DBL_MAPPING
         vmx_change_double_mapping(vcpu,
                       vmx_vrrtomrr(vcpu,oldrr.rrval),
                       vmx_vrrtomrr(vcpu,newrr.rrval));
-#endif
         break;
+#else
+    case VRN7:
+       vmx_switch_rr7(vmx_vrrtomrr(vcpu,val),vcpu->domain->shared_info,
+        (void *)vcpu->vcpu_info->arch.privregs,
+       ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
+       break;
+#endif
     default:
         ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val));
         break;
index 78ee2983195ff775811050f3c246445c71b4aab6..3307297d2ebb4d120ece1edb8ef1a6f2ec152374 100644 (file)
@@ -343,7 +343,7 @@ thash_data_t *__alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
                 hcb->recycle_notifier(hcb,(u64)entry);
         }
         thash_purge_all(hcb);
-        cch = cch_alloc(hcb);
+//        cch = cch_alloc(hcb);
     }
     return cch;
 }
@@ -364,7 +364,7 @@ void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
     ia64_rr vrr;
     u64 gppn;
     u64 ppns, ppne;
-    
+
     hash_table = (hcb->hash_func)(hcb->pta,
                         va, entry->rid, entry->ps);
     if( INVALID_ENTRY(hcb, hash_table) ) {
@@ -374,10 +374,14 @@ void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
     else {
         // TODO: Add collision chain length limitation.
         cch = __alloc_chain(hcb,entry);
-        
-        *cch = *hash_table;
-        *hash_table = *entry;
-        hash_table->next = cch;
+        if(cch == NULL){
+            *hash_table = *entry;
+            hash_table->next = 0;
+        }else{
+            *cch = *hash_table;
+            *hash_table = *entry;
+            hash_table->next = cch;
+        }
     }
     if(hcb->vcpu->domain->domain_id==0){
        thash_insert(hcb->ts->vhpt, entry, va);
@@ -396,26 +400,29 @@ void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
 
 static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
 {
-    thash_data_t    *hash_table, *cch;
+    thash_data_t   vhpt_entry, *hash_table, *cch;
     ia64_rr vrr;
-    
+    if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) {
+        panic("Can't convert to machine VHPT entry\n");
+    }
     hash_table = (hcb->hash_func)(hcb->pta,
                         va, entry->rid, entry->ps);
     if( INVALID_ENTRY(hcb, hash_table) ) {
-        if ( !__tlb_to_vhpt(hcb, entry, va, hash_table) ) {
-            panic("Can't convert to machine VHPT entry\n");
-        }
+        *hash_table = vhpt_entry;
         hash_table->next = 0;
     }
     else {
         // TODO: Add collision chain length limitation.
         cch = __alloc_chain(hcb,entry);
-        
-        *cch = *hash_table;
-        if ( !__tlb_to_vhpt(hcb, entry, va, hash_table) ) {
-            panic("Can't convert to machine VHPT entry\n");
+        if(cch == NULL){
+            *hash_table = vhpt_entry;
+            hash_table->next = 0;
+        }else{
+            *cch = *hash_table;
+            *hash_table = vhpt_entry;
+            hash_table->next = cch;
         }
-        hash_table->next = cch;
+
         if(hash_table->tag==hash_table->next->tag)
             while(1);
     }
@@ -488,10 +495,10 @@ static thash_data_t *thash_rem_cch(thash_cb_t *hcb, thash_data_t *cch)
 {
     thash_data_t *next;
 
-    if ( ++cch_depth > MAX_CCH_LENGTH ) {
-        printf ("cch length > MAX_CCH_LENGTH, exceed the expected length\n");
-        while(1);
-   }
+//    if ( ++cch_depth > MAX_CCH_LENGTH ) {
+//        printf ("cch length > MAX_CCH_LENGTH, exceed the expected length\n");
+//        while(1);
+//   }
     if ( cch -> next ) {
         next = thash_rem_cch(hcb, cch->next);
     }
@@ -914,7 +921,7 @@ void thash_init(thash_cb_t *hcb, u64 sz)
         INVALIDATE_HASH(hcb,hash_table);
     }
 }
-
+#define VTLB_DEBUG
 #ifdef  VTLB_DEBUG
 static  u64 cch_length_statistics[MAX_CCH_LENGTH+1];
 u64  sanity_check=0;
index d823dde6e1bf6eca9b2c4bac21c63882a38436cd..dd8de48c2da98ac06c1988581636193d13d932d2 100644 (file)
 #undef RFI_TO_INTERRUPT // not working yet
 #endif
 
+#define    XEN_HYPER_RFI           0x1
+#define    XEN_HYPER_RSM_DT        0x2
+#define    XEN_HYPER_SSM_DT        0x3
+#define    XEN_HYPER_COVER         0x4
+#define    XEN_HYPER_ITC_D         0x5
+#define    XEN_HYPER_ITC_I         0x6
+#define    XEN_HYPER_SSM_I         0x7
+#define    XEN_HYPER_GET_IVR       0x8
+#define    XEN_HYPER_GET_TPR       0x9
+#define    XEN_HYPER_SET_TPR       0xa
+#define    XEN_HYPER_EOI           0xb
+#define    XEN_HYPER_SET_ITM       0xc
+#define    XEN_HYPER_THASH         0xd
+#define    XEN_HYPER_PTC_GA        0xe
+#define    XEN_HYPER_ITR_D         0xf
+#define    XEN_HYPER_GET_RR        0x10
+#define    XEN_HYPER_SET_RR        0x11
+
 #ifdef CONFIG_SMP
 #warning "FIXME: ptc.ga instruction requires spinlock for SMP"
 #undef FAST_PTC_GA
index c845310c5f851bcc2d53ef8ecf973c4e75acc1bb..3297aec102ef3288f2f0ebc6f1a5b6524b654c0e 100644 (file)
@@ -51,7 +51,7 @@ ia64_set_rr (unsigned long rr, unsigned long rrv)
 // use this to allocate a rid out of the "Xen reserved rid block"
 unsigned long allocate_reserved_rid(void)
 {
-       static unsigned long currentrid = XEN_DEFAULT_RID;
+       static unsigned long currentrid = XEN_DEFAULT_RID+1;
        unsigned long t = currentrid;
 
        unsigned long max = RIDS_PER_RIDBLOCK;
index de22cd825eff3f3b12b57e94a5b6079fafcdc838..2fbad64c9a572d4437649370bbf6d4275c5f76a4 100644 (file)
@@ -1037,7 +1037,7 @@ void vcpu_set_next_timer(VCPU *vcpu)
 #endif
 
        if (is_idle_task(vcpu->domain)) {
-               printf("****** vcpu_set_next_timer called during idle!!\n");
+//             printf("****** vcpu_set_next_timer called during idle!!\n");
                vcpu_safe_set_itm(s);
                return;
        }
index 36e8a699ea3529bf0761907f2be05e0b04e41e1d..f7d5af368ee389c899018344afb026f5df781fde 100644 (file)
@@ -163,8 +163,8 @@ static inline int get_page(struct pfn_info *page,
            unlikely((nx & PGC_count_mask) == 0) ||     /* Count overflow? */
            unlikely((x >> 32) != _domain)) {           /* Wrong owner? */
            DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%08x\n",
-               page_to_pfn(page), domain, unpickle_domptr(d),
-               x, page->u.inuse.typeinfo);
+               page_to_pfn(page), domain, unpickle_domptr(domain),
+               x, page->u.inuse.type_info);
            return 0;
        }
     }
index ed35dcff355aef51fc25dbc0b6e39fbd76ad9266..c8915c164c0a1f2e88b20f3cd132ddec3f2d80cd 100644 (file)
@@ -55,8 +55,8 @@ vmMangleRID(unsigned long RIDVal)
 
        t.uint = RIDVal;
        tmp = t.bytes[1];
-       t.bytes[1] = t.bytes[3];
-       t.bytes[3] = tmp;
+       t.bytes[1] = t.bytes[2];
+       t.bytes[2] = tmp;
 
        return t.uint;
 }
index 2129e7c76715ba3af97aab1228b356fd7e73298c..5fe98c438d410bf0c0cfc5bc78070e48b7fc458d 100644 (file)
@@ -225,8 +225,8 @@ typedef struct thash_cb {
            INVALID_ENTRY(hcb, hash) = 1;        \
            hash->next = NULL; }
 
-#define PURGABLE_ENTRY(hcb,en)  \
-               ((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) )
+#define PURGABLE_ENTRY(hcb,en)  1
+//             ((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) )
 
 
 /*
index c740045bf779b7e54cb5970884345ea05c196b03..c0a6554b31bc09deae2882f8c41d34817869f75b 100644 (file)
@@ -29,7 +29,6 @@ extern void identify_vmx_feature(void);
 extern unsigned int vmx_enabled;
 extern void vmx_init_env(void);
 extern void vmx_final_setup_domain(struct domain *d);
-extern void vmx_init_double_mapping_stub(void);
 extern void vmx_save_state(struct vcpu *v);
 extern void vmx_load_state(struct vcpu *v);
 extern void vmx_setup_platform(struct vcpu *v, struct vcpu_guest_context *c);
@@ -37,6 +36,7 @@ extern void vmx_setup_platform(struct vcpu *v, struct vcpu_guest_context *c);
 extern vmx_insert_double_mapping(u64,u64,u64,u64,u64);
 extern void vmx_purge_double_mapping(u64, u64, u64);
 extern void vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7);
+extern void vmx_init_double_mapping_stub(void);
 #endif
 
 extern void vmx_wait_io(void);
index f1c7dbc0ad9911960010f9cc04af563ed0e39b6a..a21b2734120e549a9191904d1b237ee47278d53a 100644 (file)
@@ -593,9 +593,10 @@ IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
     VMX_VPD(vcpu,vpsr) |= IA64_PSR_BN;
     return (IA64_NO_FAULT);
 }
-
+#if 0
 /* Another hash performance algorithm */
 #define redistribute_rid(rid)  (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
+#endif
 static inline unsigned long
 vmx_vrrtomrr(VCPU *v, unsigned long val)
 {
@@ -603,14 +604,14 @@ vmx_vrrtomrr(VCPU *v, unsigned long val)
     u64          rid;
 
     rr.rrval=val;
-    rr.rid = vmMangleRID(v->arch.starting_rid  + rr.rid);
+    rr.rid = rr.rid + v->arch.starting_rid;
+    rr.ve = 1;
+    return  vmMangleRID(rr.rrval);
 /* Disable this rid allocation algorithm for now */
 #if 0
     rid=(((u64)vcpu->domain->domain_id)<<DOMAIN_RID_SHIFT) + rr.rid;
     rr.rid = redistribute_rid(rid);
 #endif 
 
-    rr.ve=1;
-    return rr.rrval;
 }
 #endif
index f60bd817ea56a32286de1ce980e64b86a67682e3..94e6a2964bc61b8606216c6bcf854b724a70e2e6 100644 (file)
@@ -280,22 +280,4 @@ typedef struct vcpu_guest_context {
 
 #endif /* !__ASSEMBLY__ */
 
-#define        XEN_HYPER_RFI                   0x1
-#define        XEN_HYPER_RSM_DT                0x2
-#define        XEN_HYPER_SSM_DT                0x3
-#define        XEN_HYPER_COVER                 0x4
-#define        XEN_HYPER_ITC_D                 0x5
-#define        XEN_HYPER_ITC_I                 0x6
-#define        XEN_HYPER_SSM_I                 0x7
-#define        XEN_HYPER_GET_IVR               0x8
-#define        XEN_HYPER_GET_TPR               0x9
-#define        XEN_HYPER_SET_TPR               0xa
-#define        XEN_HYPER_EOI                   0xb
-#define        XEN_HYPER_SET_ITM               0xc
-#define        XEN_HYPER_THASH                 0xd
-#define        XEN_HYPER_PTC_GA                0xe
-#define        XEN_HYPER_ITR_D                 0xf
-#define        XEN_HYPER_GET_RR                0x10
-#define        XEN_HYPER_SET_RR                0x11
-
 #endif /* __HYPERVISOR_IF_IA64_H__ */